diff --git a/learntools/__init__.py b/learntools/__init__.py index 710c86703..6c13d734c 100644 --- a/learntools/__init__.py +++ b/learntools/__init__.py @@ -2,4 +2,4 @@ machine_learning, ml_explainability, ml_insights, ml_intermediate, python, \ sql -__version__ = '0.3.4' +__version__ = '0.3.5' diff --git a/learntools/core/tracking.py b/learntools/core/tracking.py index cad62b6c7..69e34f18c 100644 --- a/learntools/core/tracking.py +++ b/learntools/core/tracking.py @@ -1,12 +1,18 @@ import enum from IPython.display import display, Javascript import json - import learntools +import os # If set to True, then echo logged events as output. DEBUG = False +USE_KAGGLESDK = os.environ.get('LEARN_USE_KAGGLE_SDK') == 'True' +if USE_KAGGLESDK: + from kagglesdk import KaggleClient + from kagglesdk.education.types.education_api_service import ApiTrackExerciseInteractionRequest + from kagglesdk.education.types.education_service import LearnExerciseInteractionType, LearnExerciseOutcomeType, LearnExerciseQuestionType + class InteractionType(enum.Enum): CHECK = 1 HINT = 2 @@ -32,7 +38,72 @@ class QuestionType(enum.Enum): trace = '', ) -def track(event): +def interaction_type_to_kagglesdk(event): + switch = { + InteractionType.CHECK: LearnExerciseInteractionType.CHECK, + InteractionType.HINT: LearnExerciseInteractionType.HINT, + InteractionType.SOLUTION: LearnExerciseInteractionType.SOLUTION, + } + value = event['interactionType'] + assert value in switch + return switch.get(value) + +def outcome_type_to_kagglesdk(interaction_type, event): + switch = { + OutcomeType.PASS: LearnExerciseOutcomeType.PASS, + OutcomeType.FAIL: LearnExerciseOutcomeType.FAIL, + OutcomeType.EXCEPTION: LearnExerciseOutcomeType.EXCEPTION, + OutcomeType.UNATTEMPTED: LearnExerciseOutcomeType.UNATTEMPTED, + } + + value = event.get('outcomeType', None) + if value: + assert value in switch + return switch.get(value) + else: + assert interaction_type != LearnExerciseInteractionType.CHECK, "Check events must have an OutcomeType set: {!r}".format(event) + return LearnExerciseOutcomeType.LEARN_EXERCISE_OUTCOME_TYPE_UNSPECIFIED + +def question_type_to_kagglesdk(event): + switch = { + QuestionType.EQUALITYCHECKPROBLEM: LearnExerciseQuestionType.EQUALITY_CHECK_PROBLEM, + QuestionType.CODINGPROBLEM: LearnExerciseQuestionType.CODING_PROBLEM, + QuestionType.FUNCTIONPROBLEM: LearnExerciseQuestionType.FUNCTION_PROBLEM, + QuestionType.THOUGHTEXPERIMENT: LearnExerciseQuestionType.THOUGHT_EXPERIMENT, + } + + question_type = event.get('questionType', None) + if question_type: + assert question_type in switch + return switch.get(question_type) + return None + +def track_using_kagglesdk(event): + request = ApiTrackExerciseInteractionRequest() + request.learn_tools_version = str(learntools.__version__) + request.value_towards_completion = event.get('valueTowardsCompletion', 0.0) + request.interaction_type = interaction_type_to_kagglesdk(event) + request.outcome_type = outcome_type_to_kagglesdk(request.interaction_type, event) + request.fork_parent_kernel_session_id = int(os.environ.get('KAGGLE_LEARN_SESSION_ID')) + + question_type = question_type_to_kagglesdk(event) + if question_type: + request.question_type = question_type + + client = KaggleClient() + result = client.education.education_api_client.track_exercise_interaction(request) + + # Post the result back to the outer frame. When running in Kaggle + # Notebooks, the outer frame is listening for this message and may show a + # nudge. + message = dict( + jupyterEvent='custom.exercise_interaction_result', + data=result.to_json()) + js = 'parent.postMessage({}, "*")'.format(json.dumps(message)) + display(Javascript(js)) + + +def track_using_iframe(event): # TODO: could be nice to put some validation logic here. for k, v in _EVENT_DEFAULTS.items(): event.setdefault(k, v) @@ -65,3 +136,9 @@ def track(event): display(Javascript(debug_js)) display(message) +def track(event): + if USE_KAGGLESDK: + track_using_kagglesdk(event) + else: + track_using_iframe(event) + \ No newline at end of file diff --git a/learntools/sql/ex6.py b/learntools/sql/ex6.py index ffd11814f..ea04630e1 100644 --- a/learntools/sql/ex6.py +++ b/learntools/sql/ex6.py @@ -186,14 +186,17 @@ def check(self, query, results): assert ('group by' in lower_query), ('Your query should have a **GROUP BY** clause.') assert ('count' in lower_query), ('Your query should have a **COUNT** in the **SELECT** statement.') assert ('%bigquery' in lower_query), ('Your **WHERE** clause is not filtering on the "bigquery" tag correctly.') + # check 2: column names results.columns = [c.lower() for c in results.columns] assert ('user_id' in results.columns), ('You do not have a `user_id` column in your results.') assert ('number_of_answers' in results.columns), ('You do not have a `number_of_answers` column in your results.') + # check 3: correct user IDs correct_ids = bigquery_experts_answer.loc[bigquery_experts_answer.user_id.notna(), "user_id"].unique() submitted_ids = results.loc[results.user_id.notna(), "user_id"].unique() - assert np.array_equal(correct_ids, submitted_ids), 'You seem to have the wrong values in the `user_id` column.' + assert np.array_equal(np.sort(correct_ids), np.sort(submitted_ids)), 'You seem to have the wrong values in the `user_id` column.' + # check 4: check one value from other column first_id = list(bigquery_experts_answer["user_id"])[0] correct_num = int(bigquery_experts_answer[bigquery_experts_answer["user_id"] == first_id]["number_of_answers"])